#loading packages
library(ezids)
library(ggplot2)
library(ggrepel)
library(gridExtra)
library(tibble)
library(dplyr)
library(tidyr)
library(psych)
library(corrplot)
library(lattice)
library(FNN)
library(caret)
library(pROC)
#loading data
NYweath <- data.frame(read.csv("data/NYC_weather_1869_2022.csv"))
#converting to R date format and adding columns for day, month, and year
NYweath$DATE <- as.Date(NYweath$DATE)
NYweath$day <- format(NYweath$DATE, format="%d")
NYweath$month <- format(NYweath$DATE, format="%m")
NYweath$year <- format(NYweath$DATE, format="%Y")
#converting temperature observations to numerical
NYweath$TMAX <- as.numeric(NYweath$TMAX)
NYweath$TMIN <- as.numeric(NYweath$TMIN)
NYweath$TAVG <- as.numeric(NYweath$TAVG)
NYweath$year <- as.numeric(NYweath$year)
#Making month a factor
NYweath$month <- as.factor(NYweath$month)
# subset data to desired variables
NYweath_sub <- subset(NYweath, select = c(DATE, day, month, year, TMAX, TMIN, TAVG, PRCP, SNOW))
#creating a subset for 1900 on
NYweath_00 <- subset(NYweath_sub, year > 1899)
xkabledplyhead(NYweath_00)
#new data loading from Emily, New York State population from 1900 on
NYSPop_1900 <- data.frame(read.csv("data/NYSPop_1900-2021.csv"))
NYSPop_1900$Population <- as.numeric(NYSPop_1900$Population)
NYSPop_1900$Year <- as.numeric(NYSPop_1900$Year)
#creating a subset for 1900-2021
NYweath_00a <- subset(NYweath_sub, year > 1899)
NYweath_00a <- subset(NYweath_00a, year < 2022)
for(i in 1:length(NYweath_00a$year)){
(NYweath_00a$Pop[i]= NYSPop_1900$Population[(which(NYSPop_1900$Year == NYweath_00a$year[i]))]
)}
#New data loading from Emily, shootings
NYshoot <- data.frame(read.csv("data/Shooting_Counts_ERG.csv"))
#converting to R date format and adding columns for day, month, and year
NYshoot$DATE <- as.Date(NYshoot$Date, format = "%m/%d/%Y")
#cleaning shooting data, merging with NYweath_00a
NYweathshoot_06 <- subset (NYweath_00a, year > 2005)
NYweathshoot_06 <- subset (NYweathshoot_06, year < 2022)
NYweathshoot_06 <- full_join(NYshoot, NYweathshoot_06, by = "DATE")
NYweathshoot_06$day <- format(NYweathshoot_06$DATE, format="%d")
NYweathshoot_06$month <- format(NYweathshoot_06$DATE, format="%m")
NYweathshoot_06$year <- format(NYweathshoot_06$DATE, format="%Y")
NYweathshoot_06$day <- as.numeric(NYweathshoot_06$day)
NYweathshoot_06$month <- as.factor(NYweathshoot_06$month)
NYweathshoot_06$year <- as.numeric(NYweathshoot_06$year, format="%Y")
NYweathshoot_06 <- NYweathshoot_06 %>% mutate(Shootings = ifelse(is.na(Shootings), 0, Shootings))
summary(NYweathshoot_06)
##CW ADD: Adding a 'TOT_PRCP' row that sums up the total precipitation between SNOW and PRCP. This row will be used in Question 3.
NYweath_final <- NYweath_00
NYweath_final$TOT_PRCP <- NYweath_00$PRCP + NYweath_00$SNOW
In this project, we are digging into the relationship between human activity and weather in New York city. Our three driving questions are:
For our first project, we analyzed daily weather patterns from data collected at a weather station in Central Park, New York City made available online by the National Oceanic and Atmospheric Administration. Through our analysis, we confirmed that there was a statistically significant rise in daily maximum temperatures in Central Park over the last 122 years.
We performed an ANOVA test on daily maximum temperature values over different periods of time and found statistically significant results regarding variance in-between our samples. This led us to create linear models for the change in daily maximum temperature over time, revealing statistically significant warming at an average rate of about 0.025 degrees Fahrenheit per year from 1900-2022. This is in fact a larger increase in temperature than the average global warming trend reported by [INSERT ORGANISATION NAME HERE!] (an average of 0.014 degrees Fahrenheit per year). However, since 1982, average temperatures in Central Park have increased significantly less than average global warming, perhaps because much of the development in New York City took place during the first half of the century.
We had more questions about relationships between weather and human activity, which are explored here in our Final Project.
For this project, we looked more directly at correlations between human activity and weather by incorporating new datasets related to population, air quality, crime (shootings and arrests), the stock market, and COVID-19.
Emily will re-do linear regression looking at measures of local and global human activity as regressors rather than year. She might also look into variable transformations (i.e., linear models fit to polynomials of regressors) to see if the response is best fit as linear or polynomial.
At the end of our exploratory data analysis, we developed a linear model of maximum daily temperature over time, with year as a linear regressor. This revealed to us that there is a statistically significant increase in average maximum temperatures over time. However, we do not suspect that time is the cause– rather, it is something else that has changed over time that has caused the warming in New York. We wanted to explore correlations with other, more direct proxies for human activity.
Our original fit used year as a numerical regressor and month as a categorical regressor. The resulting fit has an r-squared value of 0.775 and a slope of 0.025 degrees Fahrenheit per year, with all fit parameters’ p-values well below 0.05. The different intercepts for the each level of the categorical variable (the twelve months of the year) indicated that January is the coldest and July the hottest month in Central Park, with an average difference in maximum daily temperature of approximately 46 degrees Fahrenheit in any given year over this window.
maxTfit00_ym <- lm(formula = TMAX ~ year + month, data = NYweath_00a )
res00_ym <- residuals(maxTfit00_ym)
summary(maxTfit00_ym)
The two extremes and their linear models are plotted in the following figure.
#plot of just July and January
ggplot(NYweath_00a, aes(x = year, y = TMAX, color = month)) +
geom_point() +
scale_color_manual(values = c("01" = "purple4",
"07" = "red"), na.value = NA) +
geom_abline(aes(intercept = -11.05508, slope = 0.02539), col = "black", size = 1) +
geom_abline(aes(intercept = 34.98295, slope = 0.02539), col = "black", size = 1) +
labs(
x = "Year",
y = "Maximum Daily Temperature",
title = "Maximum Daily Temperature in Central Park") +
xlab(label = "Year") +
ylab(label = "Maximum daily temperature") +
ggtitle(label = "Maximum Daily Temperature in Central Park")
Do other weather variables correlate to TMAX?
NYweath_cor <- subset(NYweath_00a, select = c(year, TMAX, PRCP, SNOW))
str(NYweath_cor)
weathcor <- cor(NYweath_cor, use = "pairwise.complete.obs")
corrplot::corrplot(weathcor)
cor
We have found a reasonable linear model for temperature over time, but can we look instead at the connection to human activities, rather than time? Can we use some aspect of human activity as a regressor and generate a reasonable model?
We looked to the Census for U.S. population data, but that is only reported decennially, so we looked for other sources. We found historical data back to 1960 for New York state online https://www.macrotrends.net/cities/23083/new-york-city/population. Because this source is not known to us, we validated it against decennial census data.
A bunch of linear models…
#LM1
maxTfit00_m <- lm(formula = TMAX ~ month, data = NYweath_00a)
summary(maxTfit00_m)
#LM4
maxTfit00_all <- lm(formula = TMAX ~ year + month + PRCP, data = NYweath_00a)
summary(maxTfit00_all)
#maxTfit00_all_intrxn <- lm(formula = TMAX ~ year + month*day + PRCP + SNOW, data = NYweath_00a)
#summary(maxTfit00_all)
#anova(maxTfit00_m, maxTfit00_ym)
#anova(maxTfit00_all, maxTfit00_all_intrxn)
#LM2
maxTfit00_pop <- lm(formula = TMAX ~ Pop + month, data = NYweath_00a)
summary(maxTfit00_pop)
#maxTfit00_pop_all <- lm(formula = TMAX ~ Pop + month + PRCP, data = NYweath_00a)
#summary(maxTfit00_pop)
#plot of NYS Pop over time
ggplot(NYweath_00a, aes(x = year, y = Pop)) +
geom_point() +
# geom_abline(aes(intercept = -11.05508, slope = 0.02539), col = "black", size = 1) +
labs(
x = "Year",
y = "New York State Population",
title = "Annual Population of New York State") +
xlab(label = "Year") +
ylab(label = "New York State Population") +
ggtitle(label = "Annual Population in New York State")
The AQI is an index for reporting daily air quality. It tells how clean or polluted the air is, and what associated health effects might be a concern for the public. The higher the AQI value, the the greater the level of air pollution and greater the health concern. Outdoor concentrations of pollutants such as Ozone, Carbon Monoxide, Nitrogen dioxide, Sulfur Dioxide, and PM2.5/PM10 concentrations are measured at stations across New York City and reported to the EPA. The Daily Air Quality Index (AQI) is calculated based on these concentration values and stored within the EPA’s Air Quality System database.
As city life changes, so does its air quality. Sources of emissions such as traffic and burning of fossil fuels for energy generation can cause air quality to deteriorate. Emissions can also contribute to global warming by releasing more greenhouse gasses into the atmosphere, thus increasing average temperatures. As more people migrate to urban areas, we will continue to see a deterioration in air quality unless measures are taken. The goal for integrating this data is to study the affects of weather patterns on air quality, and to statistically verify changes in air quality over time in New York City.
The dataset contains about 7,000 observations collected from January, 2000 to October, 2022.
We start by looking at the distribution of our variables of interest: AQI.
# distribution plot of pmi2.5 and daily AQI
mean_aqi <- mean(DailyAQ_00_22$AQI)
ggplot(DailyAQ_00_22) +
geom_histogram(aes(x=AQI), na.rm=TRUE, alpha=0.5, color="black", fill='#2DD164', bins=50, binwidth=5) +
geom_vline(xintercept=mean_aqi, color="black", size=1, linetype=5, show.legend=FALSE) +
annotate("text", x=mean_aqi + 9, y=625, label=paste(round(mean_aqi, 2)), angle=0, size=4, color="black") +
labs(title="Distribution of Daily AQI Level", x="", y="Count")
In the histogram depicting the distribution of AQI, we can gauge that
the distribution is fairly normal. Although it is slight right skewed,
the number of data points suggests we can treat it as normal for our
modeling. The right-skewness is caused by days with unusually high AQI
values.
ggplot(DailyAQ_00_22_Yearly_Growth, aes(group=1)) +
geom_line(aes(x = year, y = aqi_diffRate), na.rm = T, stat="identity", color="#043008", size=1) +
geom_point(aes(x = year, y = aqi_diffRate), na.rm = TRUE, fill="#E6E930", shape = 23) +
labs(title="AQI year-over-year rate in NYC", x="Year", y="AQI") +
theme(
axis.title.y = element_text(color = "#043008", size = 13),
axis.title.y.right = element_text(color = "#E6E930", size = 13)
)
The year-over-year growth rate was also calculated based on yearly average AQI and is depicted in the 2nd line plot. We see an alternating pattern between years that increases in variance as we move towards 2022.
In order to evaluate correlation between weather and air quality, we combined our dataset with the NYC weather data based on the date value in each. Dates without a matching air quality measurement are dropped. Subsequent models will be built using this merged dataframe.
# merge data frame by date
DailyAQ_merged <- merge(DailyAQ_00_22, NYweath_00, by="DATE")
# select required columns
DailyAQ_merged <- DailyAQ_merged[ , c('DATE', 'year.x', 'month', 'AQI', 'TMAX', 'TMIN', 'PRCP', 'SNOW')]
colnames(DailyAQ_merged)[2] <- "year"
str(DailyAQ_merged)
xkablesummary(DailyAQ_merged)
The first step to building linear models is assessing correlation between numerical variables in the data. Because the year variable in our dataset begins at 2000, it will unnecessarily scale our coefficients when used in linear modeling. We properly scaled the variable to start at 0 (and continue to 22 to represent 2022).
The correlation is evaluated via a pairs plot, which depicts the correlation coefficient between numerical variables and scatterplots of their relationships. The pairs plot uses the Pearson correlation method.
# subset to numerical variables
DailyAQ_numerics <- DailyAQ_merged[ , c('AQI', 'TMAX', 'TMIN', 'PRCP', 'SNOW', 'year')]
DailyAQ_numerics$year <- DailyAQ_numerics$year - 2000
pairs.panels(DailyAQ_numerics,
method = "pearson", # correlation method
hist.col = "red", # set histogram color
density = TRUE, # show density plots
ellipses = TRUE, # show correlation ellipses
smoother = TRUE,
lm = TRUE,
main = "Pairs Plot Of Weather and Air Quality Numerical Variables",
cex.labels=0.75
)
From the pearson pairsplot above, we can see a moderately high, negative correlation value between year and AQI. This indicates that as the year increases, the AQI is actually dropping resulting in better air quality in the city.
To better observe the effects of year on AQI, we can visualize the yearly average AQI.
# yearly average and year-over year growth of daily AQI and PM2.5
ggplot(DailyAQ_00_22_Yearly_Growth) +
geom_line(aes(x = year, y = aqi_avg), stat="identity", color="#2DD164", size=1) +
geom_point(aes(x = year, y = aqi_avg), na.rm = TRUE, fill="#457108", shape = 21) +
labs(title="Average AQI by year in NYC", x="Year", y="AQI value")
The line plot confirms the correlation value we observed in the pairs plot. The average yearly AQI is indeed decreasing as year increases. To further test our observations, we will build a linear model using year as a regressor to estimate daily AQI.
aqi_fit <- lm(AQI ~ year, data = DailyAQ_numerics)
summary(aqi_fit)
xkabledply(aqi_fit, title = paste("First Linear Model: ", format( formula(aqi_fit) )))
The results of our linear model reveal a significant value for both the intercept and year coefficient. The coefficient value for the year regressor, -1.775, indicates that for every year increase, the predicted daily AQI decreases by a factor of 1.78. This supports the correlation coefficient we saw earlier between these two variables. The p-value of the F-statistic is also significant, but the \(R^2\) value of the model is a measly 0.28. Based on this model, the year only explains 28% of the variability in daily AQI measurements. This is not a significantly high result. Looking at the scatterplot of the relationship can help explain the weak fit.
ggplot(DailyAQ_00_22, aes(x = year, y = AQI)) +
geom_point(alpha = 0.5, color = "#2DD164", position = "jitter") +
labs(x = "Year", y = "AQI Value", title = "Daily AQI Values From 2000-2022 With Trend Line") +
geom_smooth(method = 'lm', formula = 'y ~ x', color = "black", fill="black")
As we can see, there is a high degree of noise when observing daily AQI values at the yearly level. Although the plot displays a slightly downward trend in daily AQI, but model fit is distorted. This helps explain the results we received from our linear model.
Can we add more or different predictors to improve the fit? In our first analysis, we looked at linear trends of TMAX over time and determined a slight positive correlation observed over the years 1900-2022. We also utilized month as a categorical regressor to help explain the variance in daily maximum temperatures. Based on those results, we concluded that seasonality trends had a negative impact on model performance. Perhaps seasonality also also plays a part in daily AQI measurements?
# NYC weather - Avg TMAX by month
NYweath_Monthly_Avg <- NYweath_00 %>%
group_by(month) %>%
summarize(avg_max_temp = mean(TMAX, na.rm=T),
avg_min_temp = mean(TMIN, na.rm=T))
ggplot(NYweath_Monthly_Avg, aes(x = as.numeric(month), y = avg_max_temp)) +
geom_line(color="#F21E1E", size = 2) +
geom_point(na.rm = TRUE, fill="#126BF4", shape = 21, size = 4) +
labs(title="Average TMAX By Month in NYC", x="Month", y="Temperature (°F)") +
scale_x_continuous(name = "Month",
breaks = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
To refresh our memories, we included the monthly average daily maximum temperature. A seasonal trend can be observed as temperatures increase during summer months and decrease during winter months.
DailyAQ_monthly <- DailyAQ_merged %>%
group_by(month) %>%
summarize(aqi_avg = mean(AQI, na.rm=T))
# monthly average AQI
ggplot(DailyAQ_monthly, aes(x = as.numeric(month), y = aqi_avg)) +
geom_line(color="#47ABE9", size = 2) +
geom_point(na.rm = TRUE, fill="#C10808", shape = 21, size = 4) +
labs(title="Average AQI By Month in NYC", x="Month", y="AQI") +
scale_x_continuous(name = "Month",
breaks = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
Plotting the average AQI by month, we can see seasonal trends as AQI values are generally high during winter and summer months but the low in the months between.
Based on this, we can modify our last model and attempt to fit
seasonality by adding month as a categorical regressor,
along with our variable-of-interest from the last project - TMAX.
aqi_fit2 <- lm(AQI ~ TMAX + month, data = DailyAQ_merged)
summary(aqi_fit2)
xkabledply(aqi_fit2, title = paste("Second Linear Model: ", format( formula(aqi_fit2) )))
The regression coefficient for TMAX is significant and positively correlated, with each degree Fahrenheit increase resulting in AQI increasing by a factor of 0.68. The regression coefficients for all month categories are also significant. In fact, every month has a negative impact on AQI when compared to January. September exhibits the largest difference, with a predicted AQI almost 44 points lower than January!
The p-value of the model’s F-statistic is also significant, allowing
us to reject the null hypothesis and conclude that there’s a significant
relationship between our chosen predictors and the daily AQI value.
However, the \(R^2\) for our model is
only .149, which is weaker than our previous model. This
indicates that only 14.7% of the variation in daily AQI can be explained
by TMAX and month.
# model VIF scores
xkablevif(aqi_fit2, title = "Model 2 VIF")
The VIF scores for all regressors are in an acceptable range, however the fit is still poor.
From our results, we can conclude that linear models provide an inaccurate representation of the effects of seasonality within our data.
It seems that due to seasonality, and the nature of time-series data, we cannot properly model daily AQI using linear regression. Perhaps a classification technique can be utilized to properly address the seasonal trends. More precisely, we can build a kNN model to classify the month based on daily AQI and maximum temperature values.
We start with plotting the relationship between our chosen predictors and add a layer to discern month within the scatter.
ggplot(DailyAQ_merged) +
geom_point(aes(x=AQI, y=TMAX, color=month), alpha = 0.7) +
labs(title = "Daily Maximum Temperature vs Daily Air Quality Index Value Distinguished By Month",
x = "Daily AQI Value",
y = "Daily Maximum Temperature (F)") +
scale_color_brewer(palette = "Paired")
We can make out minimal distinction of month from the scatterplot above, but the model will provide a more detailed analysis.
The kNN model requires us to center and scale our predictor values, as they are recorded in vastly different units of measurement. We also need to split our dataset into training and testing frames. We used a 4:1 split for to satisty this requirement.
# center and scale our data
DailyAQ_scaled <- as.data.frame(scale(DailyAQ_merged[4:5], center = TRUE, scale = TRUE))
str(DailyAQ_scaled)
# create train and test data sets with 4:1 splits
set.seed(1000)
DailyAQ_sample <- sample(2, nrow(DailyAQ_scaled), replace=TRUE, prob=c(0.80, 0.20))
DailyAQ_training <- DailyAQ_scaled[DailyAQ_sample == 1, ]
DailyAQ_test <- DailyAQ_scaled[DailyAQ_sample == 2, ]
# create label sets.
DailyAQ_trainLabels <- DailyAQ_merged[DailyAQ_sample == 1, 3]
DailyAQ_testLabels <- DailyAQ_merged[DailyAQ_sample == 2, 3]
nrow(DailyAQ_training)
nrow(DailyAQ_test)
evaluateK = function(k, train_set, val_set, train_class, val_class){
# Build knn with k neighbors considered.
set.seed(1000)
class_knn = knn(train = train_set, #<- training set cases
test = val_set, #<- test set cases
cl = train_class, #<- category for classification
k = k) #<- number of neighbors considered
tab = table(class_knn, val_class)
# Calculate the accuracy.
accu = sum(tab[row(tab) == col(tab)]) / sum(tab)
cbind(k = k, accuracy = accu)
}
# call evaluateK function for each odd k-value between 1 to 21
knn_different_k = sapply(seq(1, 21, by = 2),
function(x) evaluateK(x,
train_set = DailyAQ_training,
val_set = DailyAQ_test,
train_class = DailyAQ_trainLabels,
val_class = DailyAQ_testLabels))
# Reformat the results
knn_different_k = data.frame(k = knn_different_k[1,],
accuracy = knn_different_k[2,])
ggplot(knn_different_k, aes(x = k, y = accuracy)) +
geom_line(color = "orange", size = 1.5) +
geom_point(size = 3) +
labs(title = "kNN Model Accuracy vs k-value",
x = "Model k-value",
y = "Accuracy")
To find the best k-value for our model, we evaluated the model over a range of k from 1 to 21. From the plot about, it seems 13-nearest neighbors is a decent choice since it provides the greatest improvement in predictive accuracy before the incremental improvement trails off.
# set kval
kval <- 13
# build model
DailyAQ_pred <- FNN::knn(train = DailyAQ_training,
test = DailyAQ_test,
cl = DailyAQ_trainLabels,
k = kval)
# confusion matrix
DailyAQ_confusionMatrix <- caret::confusionMatrix(DailyAQ_pred, reference = DailyAQ_testLabels)
DailyAQ_pred_accuracy <- DailyAQ_confusionMatrix$overall['Accuracy']
xkabledply(as.matrix(DailyAQ_confusionMatrix), title = paste("ConfusionMatrix for k = ", kval))
xkabledply(data.frame(DailyAQ_confusionMatrix$byClass), title=paste("k = ", kval))
The overall accuracy of our model is a relatively weak value of 0.257. This indicates that AQI and TMAX were not good predictors of month.
# multiclass ROC on test labels
knnROC <- multiclass.roc(DailyAQ_testLabels, as.integer(DailyAQ_pred))
knnROC
A multiclass ROC evaluation on the test labels yields an AUC value of 0.65, which is higher than expected based on the model’s accuracy value. Still, this is not a significant result based on the AUC threshold of 0.8.